In [1]:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#   LEARN FCN00
#

from __future__ import print_function
import argparse
import os

import numpy as np
import pickle
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate
from keras.layers import merge
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import list_pictures, array_to_img

from image_ext import list_pictures_in_multidir, load_imgs_asarray, img_dice_coeff
from create_fcn import create_fcn01, create_fcn02, create_fcn00, create_pupil_net

np.random.seed(2016)
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
/home/nakazawa_atsushi/anaconda3/envs/py3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)
In [2]:
def dice_coef(y_true, y_pred):
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return (2.*intersection + 1) / (K.sum(y_true) + K.sum(y_pred) + 1)

def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)
In [3]:
def load_fnames(paths):
    f = open(paths)
    data1 = f.read()
    f.close()
    lines = data1.split('\n')
    #print(len(lines))
    # 最終行は空行なので消す
    del(lines[len(lines)-1])
    #print(len(lines))
    return lines
In [4]:
def make_fnames(fnames,fpath,fpath_mask,mask_ext):
    fnames_img = [];
    fnames_mask= [];
    
    for i in range(len(fnames)):
        fnames_img.append(fpath + '/' + fnames[i]);
        fnames_mask.append(fpath_mask + '/' + mask_ext + fnames[i]);
        
    return [fnames_img,fnames_mask]
In [5]:
#
#  MAIN STARTS FROM HERE
#
if __name__ == '__main__':
    
    target_size = (224, 224)
    dpath_this = './'
    dname_checkpoints = 'checkpoints_fcn00'
    dname_checkpoints_fcn01 = 'checkpoints_fcn01'
    dname_outputs = 'outputs'
    fname_architecture = 'architecture.json'
    fname_weights = "model_weights_{epoch:02d}.h5"
    fname_stats = 'stats01.npz'
    dim_ordering = 'channels_first'
    fname_history = "history.pkl"

    # definision of mode, LEARN or TEST or SHOW_HISTORY
    mode = "LEARN"
    #mode = "SHOW_HISTORY"
    #mode = "TEST"

    # モデルを作成
    print('creating model fcn00 and fcn01...')
    #model_fcn02 = create_fcn02(target_size)
    model_fcn01 = create_fcn01(target_size)
    model_fcn00 = create_fcn00(target_size)
    
    if os.path.exists(dname_checkpoints) == 0:
        os.mkdir(dname_checkpoints)
creating model fcn00 and fcn01...
In [6]:
#
#   LEARNING MODE
#
if mode == "LEARN":
    # Read Learning Data
    fnames = load_fnames('data/list_train_01.txt')
    [fpaths_xs_train,fpaths_ys_train] = make_fnames(fnames,'data/img','data/mask','OperatorA_')

    X_train = load_imgs_asarray(fpaths_xs_train, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_train = load_imgs_asarray(fpaths_ys_train, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering) 

    # Read Validation Data
    fnames = load_fnames('data/list_valid_01.txt')
    [fpaths_xs_valid,fpaths_ys_valid] = make_fnames(fnames,'data/img','data/mask','OperatorA_')

    X_valid = load_imgs_asarray(fpaths_xs_valid, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    Y_valid = load_imgs_asarray(fpaths_ys_valid, grayscale=True, target_size=target_size,
                                dim_ordering=dim_ordering)     

    print('==> ' + str(len(X_train)) + ' training images loaded')
    print('==> ' + str(len(Y_train)) + ' training masks loaded')
    print('==> ' + str(len(X_valid)) + ' validation images loaded')
    print('==> ' + str(len(Y_valid)) + ' validation masks loaded')

    # 前処理
    print('computing mean and standard deviation...')
    mean = np.mean(X_train, axis=(0, 2, 3))
    std = np.std(X_train, axis=(0, 2, 3))
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    print('saving mean and standard deviation to ' + fname_stats + '...')
    stats = {'mean': mean, 'std': std}
    np.savez(dname_checkpoints + '/' + fname_stats, **stats)
    print('==> done')

    print('globally normalizing data...')
    for i in range(3):
        X_train[:, i] = (X_train[:, i] - mean[i]) / std[i]
        X_valid[:, i] = (X_valid[:, i] - mean[i]) / std[i]
    Y_train /= 255
    Y_valid /= 255
    print('==> done')
==> 1452 training images loaded
==> 1452 training masks loaded
==> 527 validation images loaded
==> 527 validation masks loaded
computing mean and standard deviation...
==> mean: [130.65465  91.2685   76.63643]
==> std : [55.2817   43.990963 43.113483]
saving mean and standard deviation to stats01.npz...
==> done
globally normalizing data...
==> done
In [8]:
    # モデルに学習済のfcn02 Weightをロードする
    epoch = 200
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights_fcn01 = os.path.join(dname_checkpoints_fcn01, fname_weights)
    model_fcn01.load_weights(fpath_weights_fcn01)
    #print('==> done')

    # load weights from Learned U-NET
    layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2','conv3_1','conv3_2',
                   'conv4_1','conv4_2','conv5_1', 'conv5_2',
                'up1_1', 'up1_2', 'up2_1', 'up2_2', 'up3_1', 'up3_2', 'up4_1', 
                   'up4_2', 'conv_fin']
    layer_names = ['conv1_1','conv1_2','conv2_1','conv2_2',
                'up1_1', 'up1_2', 'up2_1', 'up2_2', 'conv_fin']
    
    print('copying layer weights')
    for name in layer_names:
        print(name)
        model_fcn00.get_layer(name).set_weights(model_fcn01.get_layer(name).get_weights())
        model_fcn00.get_layer(name).trainable = True
copying layer weights
conv1_1
conv1_2
conv2_1
conv2_2
up1_1
up1_2
up2_1
up2_2
conv_fin
In [9]:
    # 損失関数,最適化手法を定義
    adam = Adam(lr=1e-5)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.1, nesterov=True)
    #rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
    model_fcn00.compile(optimizer=adam, loss=dice_coef_loss, metrics=[dice_coef])

    # 構造・重みを保存するディレクトリーの有無を確認
    dpath_checkpoints = os.path.join(dpath_this, dname_checkpoints)
    if not os.path.isdir(dpath_checkpoints):
        os.mkdir(dpath_checkpoints)

    # 重みを保存するためのオブジェクトを用意
    fname_weights = "model_weights_{epoch:02d}.h5"
    fpath_weights = os.path.join(dpath_checkpoints, fname_weights)
    checkpointer = ModelCheckpoint(filepath=fpath_weights, save_best_only=False)      
In [10]:
    # トレーニングを開始
    print('start training...')
    history = model_fcn00.fit(X_train[:,:,:,:], Y_train[:,:,:,:], batch_size=64, epochs=200, verbose=1,
                  shuffle=True, validation_data=(X_valid, Y_valid), callbacks=[checkpointer])
start training...
Train on 1452 samples, validate on 527 samples
Epoch 1/200
1452/1452 [==============================] - 98s 68ms/step - loss: -0.0396 - dice_coef: 0.0396 - val_loss: -0.0640 - val_dice_coef: 0.0640
Epoch 2/200
1452/1452 [==============================] - 66s 46ms/step - loss: -0.1457 - dice_coef: 0.1457 - val_loss: -0.3963 - val_dice_coef: 0.3963
Epoch 3/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.5585 - dice_coef: 0.5585 - val_loss: -0.6822 - val_dice_coef: 0.6822
Epoch 4/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.6837 - dice_coef: 0.6837 - val_loss: -0.7261 - val_dice_coef: 0.7261
Epoch 5/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7327 - dice_coef: 0.7327 - val_loss: -0.7499 - val_dice_coef: 0.7499
Epoch 6/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7545 - dice_coef: 0.7545 - val_loss: -0.7713 - val_dice_coef: 0.7713
Epoch 7/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7735 - dice_coef: 0.7735 - val_loss: -0.7841 - val_dice_coef: 0.7841
Epoch 8/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7707 - dice_coef: 0.7707 - val_loss: -0.7895 - val_dice_coef: 0.7895
Epoch 9/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7832 - dice_coef: 0.7832 - val_loss: -0.7970 - val_dice_coef: 0.7970
Epoch 10/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7952 - dice_coef: 0.7952 - val_loss: -0.7890 - val_dice_coef: 0.7890
Epoch 11/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.7997 - dice_coef: 0.7997 - val_loss: -0.8138 - val_dice_coef: 0.8138
Epoch 12/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8090 - dice_coef: 0.8090 - val_loss: -0.8145 - val_dice_coef: 0.8145
Epoch 13/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8095 - dice_coef: 0.8095 - val_loss: -0.8103 - val_dice_coef: 0.8103
Epoch 14/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8130 - dice_coef: 0.8130 - val_loss: -0.8118 - val_dice_coef: 0.8118
Epoch 15/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8221 - dice_coef: 0.8221 - val_loss: -0.8159 - val_dice_coef: 0.8159
Epoch 16/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8172 - dice_coef: 0.8172 - val_loss: -0.8175 - val_dice_coef: 0.8175
Epoch 17/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8094 - dice_coef: 0.8094 - val_loss: -0.8050 - val_dice_coef: 0.8050
Epoch 18/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8053 - dice_coef: 0.8053 - val_loss: -0.8127 - val_dice_coef: 0.8127
Epoch 19/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8233 - dice_coef: 0.8233 - val_loss: -0.8292 - val_dice_coef: 0.8292
Epoch 20/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8287 - dice_coef: 0.8287 - val_loss: -0.8194 - val_dice_coef: 0.8194
Epoch 21/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8214 - dice_coef: 0.8214 - val_loss: -0.8106 - val_dice_coef: 0.8106
Epoch 22/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8273 - dice_coef: 0.8273 - val_loss: -0.8181 - val_dice_coef: 0.8181
Epoch 23/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8346 - dice_coef: 0.8346 - val_loss: -0.8355 - val_dice_coef: 0.8355
Epoch 24/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8390 - dice_coef: 0.8390 - val_loss: -0.8365 - val_dice_coef: 0.8365
Epoch 25/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8332 - dice_coef: 0.8332 - val_loss: -0.8303 - val_dice_coef: 0.8303
Epoch 26/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8323 - dice_coef: 0.8323 - val_loss: -0.8408 - val_dice_coef: 0.8408
Epoch 27/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8389 - dice_coef: 0.8389 - val_loss: -0.8392 - val_dice_coef: 0.8392
Epoch 28/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8449 - dice_coef: 0.8449 - val_loss: -0.8404 - val_dice_coef: 0.8404
Epoch 29/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8467 - dice_coef: 0.8467 - val_loss: -0.8415 - val_dice_coef: 0.8415
Epoch 30/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8430 - dice_coef: 0.8430 - val_loss: -0.8401 - val_dice_coef: 0.8401
Epoch 31/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8413 - dice_coef: 0.8413 - val_loss: -0.8093 - val_dice_coef: 0.8093
Epoch 32/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8472 - dice_coef: 0.8472 - val_loss: -0.8445 - val_dice_coef: 0.8445
Epoch 33/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8517 - dice_coef: 0.8517 - val_loss: -0.8391 - val_dice_coef: 0.8391
Epoch 34/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8526 - dice_coef: 0.8526 - val_loss: -0.8452 - val_dice_coef: 0.8452
Epoch 35/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8516 - dice_coef: 0.8516 - val_loss: -0.8436 - val_dice_coef: 0.8436
Epoch 36/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8555 - dice_coef: 0.8555 - val_loss: -0.8393 - val_dice_coef: 0.8393
Epoch 37/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8557 - dice_coef: 0.8557 - val_loss: -0.8467 - val_dice_coef: 0.8467
Epoch 38/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8561 - dice_coef: 0.8561 - val_loss: -0.8431 - val_dice_coef: 0.8431
Epoch 39/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8571 - dice_coef: 0.8571 - val_loss: -0.8343 - val_dice_coef: 0.8343
Epoch 40/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8563 - dice_coef: 0.8563 - val_loss: -0.8469 - val_dice_coef: 0.8469
Epoch 41/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8557 - dice_coef: 0.8557 - val_loss: -0.8457 - val_dice_coef: 0.8457
Epoch 42/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8616 - dice_coef: 0.8616 - val_loss: -0.8443 - val_dice_coef: 0.8443
Epoch 43/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8597 - dice_coef: 0.8597 - val_loss: -0.8366 - val_dice_coef: 0.8366
Epoch 44/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8567 - dice_coef: 0.8567 - val_loss: -0.8469 - val_dice_coef: 0.8469
Epoch 45/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8645 - dice_coef: 0.8645 - val_loss: -0.8394 - val_dice_coef: 0.8394
Epoch 46/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8645 - dice_coef: 0.8645 - val_loss: -0.8419 - val_dice_coef: 0.8419
Epoch 47/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8649 - dice_coef: 0.8649 - val_loss: -0.8518 - val_dice_coef: 0.8518
Epoch 48/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8649 - dice_coef: 0.8649 - val_loss: -0.8462 - val_dice_coef: 0.8462
Epoch 49/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8648 - dice_coef: 0.8648 - val_loss: -0.8328 - val_dice_coef: 0.8328
Epoch 50/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8543 - dice_coef: 0.8543 - val_loss: -0.8464 - val_dice_coef: 0.8464
Epoch 51/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8588 - dice_coef: 0.8588 - val_loss: -0.8445 - val_dice_coef: 0.8445
Epoch 52/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8621 - dice_coef: 0.8621 - val_loss: -0.8521 - val_dice_coef: 0.8521
Epoch 53/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8659 - dice_coef: 0.8659 - val_loss: -0.8490 - val_dice_coef: 0.8490
Epoch 54/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8650 - dice_coef: 0.8650 - val_loss: -0.8508 - val_dice_coef: 0.8508
Epoch 55/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8678 - dice_coef: 0.8678 - val_loss: -0.8490 - val_dice_coef: 0.8490
Epoch 56/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8706 - dice_coef: 0.8706 - val_loss: -0.8471 - val_dice_coef: 0.8471
Epoch 57/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8725 - dice_coef: 0.8725 - val_loss: -0.8501 - val_dice_coef: 0.8501
Epoch 58/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8693 - dice_coef: 0.8693 - val_loss: -0.8480 - val_dice_coef: 0.8480
Epoch 59/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8734 - dice_coef: 0.8734 - val_loss: -0.8496 - val_dice_coef: 0.8496
Epoch 60/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8737 - dice_coef: 0.8737 - val_loss: -0.8530 - val_dice_coef: 0.8530
Epoch 61/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8770 - dice_coef: 0.8770 - val_loss: -0.8454 - val_dice_coef: 0.8454
Epoch 62/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8724 - dice_coef: 0.8724 - val_loss: -0.8536 - val_dice_coef: 0.8536
Epoch 63/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8790 - dice_coef: 0.8790 - val_loss: -0.8500 - val_dice_coef: 0.8500
Epoch 64/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8805 - dice_coef: 0.8805 - val_loss: -0.8516 - val_dice_coef: 0.8516
Epoch 65/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8727 - dice_coef: 0.8727 - val_loss: -0.8486 - val_dice_coef: 0.8486
Epoch 66/200
1452/1452 [==============================] - 65s 45ms/step - loss: -0.8804 - dice_coef: 0.8804 - val_loss: -0.8452 - val_dice_coef: 0.8452
Epoch 67/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.8818 - dice_coef: 0.8818 - val_loss: -0.8530 - val_dice_coef: 0.8530
Epoch 68/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8848 - dice_coef: 0.8848 - val_loss: -0.8563 - val_dice_coef: 0.8563
Epoch 69/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8819 - dice_coef: 0.8819 - val_loss: -0.8567 - val_dice_coef: 0.8567
Epoch 70/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8795 - dice_coef: 0.8795 - val_loss: -0.8430 - val_dice_coef: 0.8430
Epoch 71/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8863 - dice_coef: 0.8863 - val_loss: -0.8541 - val_dice_coef: 0.8541
Epoch 72/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8877 - dice_coef: 0.8877 - val_loss: -0.8535 - val_dice_coef: 0.8535
Epoch 73/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8807 - dice_coef: 0.8807 - val_loss: -0.8485 - val_dice_coef: 0.8485
Epoch 74/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8871 - dice_coef: 0.8871 - val_loss: -0.8385 - val_dice_coef: 0.8385
Epoch 75/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8823 - dice_coef: 0.8823 - val_loss: -0.8442 - val_dice_coef: 0.8442
Epoch 76/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8899 - dice_coef: 0.8899 - val_loss: -0.8486 - val_dice_coef: 0.8486
Epoch 77/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8918 - dice_coef: 0.8918 - val_loss: -0.8544 - val_dice_coef: 0.8544
Epoch 78/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8913 - dice_coef: 0.8913 - val_loss: -0.8562 - val_dice_coef: 0.8562
Epoch 79/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8909 - dice_coef: 0.8909 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 80/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8963 - dice_coef: 0.8963 - val_loss: -0.8536 - val_dice_coef: 0.8536
Epoch 81/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8988 - dice_coef: 0.8988 - val_loss: -0.8505 - val_dice_coef: 0.8505
Epoch 82/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8955 - dice_coef: 0.8955 - val_loss: -0.8260 - val_dice_coef: 0.8260
Epoch 83/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8941 - dice_coef: 0.8941 - val_loss: -0.8537 - val_dice_coef: 0.8537
Epoch 84/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8906 - dice_coef: 0.8906 - val_loss: -0.8585 - val_dice_coef: 0.8585
Epoch 85/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8927 - dice_coef: 0.8927 - val_loss: -0.8568 - val_dice_coef: 0.8568
Epoch 86/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8973 - dice_coef: 0.8973 - val_loss: -0.8576 - val_dice_coef: 0.8576
Epoch 87/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9005 - dice_coef: 0.9005 - val_loss: -0.8488 - val_dice_coef: 0.8488
Epoch 88/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8930 - dice_coef: 0.8930 - val_loss: -0.8497 - val_dice_coef: 0.8497
Epoch 89/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8923 - dice_coef: 0.8923 - val_loss: -0.8563 - val_dice_coef: 0.8563
Epoch 90/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8995 - dice_coef: 0.8995 - val_loss: -0.8584 - val_dice_coef: 0.8584
Epoch 91/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9025 - dice_coef: 0.9025 - val_loss: -0.8527 - val_dice_coef: 0.8527
Epoch 92/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.8947 - dice_coef: 0.8947 - val_loss: -0.8563 - val_dice_coef: 0.8563
Epoch 93/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9030 - dice_coef: 0.9030 - val_loss: -0.8466 - val_dice_coef: 0.8466
Epoch 94/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9045 - dice_coef: 0.9045 - val_loss: -0.8477 - val_dice_coef: 0.8477
Epoch 95/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9055 - dice_coef: 0.9055 - val_loss: -0.8557 - val_dice_coef: 0.8557
Epoch 96/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9071 - dice_coef: 0.9071 - val_loss: -0.8500 - val_dice_coef: 0.8500
Epoch 97/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9080 - dice_coef: 0.9080 - val_loss: -0.8591 - val_dice_coef: 0.8591
Epoch 98/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9042 - dice_coef: 0.9042 - val_loss: -0.8564 - val_dice_coef: 0.8564
Epoch 99/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9122 - dice_coef: 0.9122 - val_loss: -0.8593 - val_dice_coef: 0.8593
Epoch 100/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9131 - dice_coef: 0.9131 - val_loss: -0.8484 - val_dice_coef: 0.8484
Epoch 101/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9142 - dice_coef: 0.9142 - val_loss: -0.8570 - val_dice_coef: 0.8570
Epoch 102/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9170 - dice_coef: 0.9170 - val_loss: -0.8609 - val_dice_coef: 0.8609
Epoch 103/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9141 - dice_coef: 0.9141 - val_loss: -0.8558 - val_dice_coef: 0.8558
Epoch 104/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9109 - dice_coef: 0.9109 - val_loss: -0.8510 - val_dice_coef: 0.8510
Epoch 105/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9165 - dice_coef: 0.9165 - val_loss: -0.8571 - val_dice_coef: 0.8571
Epoch 106/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9166 - dice_coef: 0.9166 - val_loss: -0.8447 - val_dice_coef: 0.8447
Epoch 107/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9191 - dice_coef: 0.9191 - val_loss: -0.8574 - val_dice_coef: 0.8574
Epoch 108/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9146 - dice_coef: 0.9146 - val_loss: -0.8508 - val_dice_coef: 0.8508
Epoch 109/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9173 - dice_coef: 0.9173 - val_loss: -0.8579 - val_dice_coef: 0.8579
Epoch 110/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9221 - dice_coef: 0.9221 - val_loss: -0.8601 - val_dice_coef: 0.8601
Epoch 111/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9191 - dice_coef: 0.9191 - val_loss: -0.8324 - val_dice_coef: 0.8324
Epoch 112/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9191 - dice_coef: 0.9191 - val_loss: -0.8491 - val_dice_coef: 0.8491
Epoch 113/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9243 - dice_coef: 0.9243 - val_loss: -0.8491 - val_dice_coef: 0.8491
Epoch 114/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9257 - dice_coef: 0.9257 - val_loss: -0.8528 - val_dice_coef: 0.8528
Epoch 115/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9165 - dice_coef: 0.9165 - val_loss: -0.8523 - val_dice_coef: 0.8523
Epoch 116/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9245 - dice_coef: 0.9245 - val_loss: -0.8338 - val_dice_coef: 0.8338
Epoch 117/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9233 - dice_coef: 0.9233 - val_loss: -0.8579 - val_dice_coef: 0.8579
Epoch 118/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9174 - dice_coef: 0.9174 - val_loss: -0.8553 - val_dice_coef: 0.8553
Epoch 119/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9153 - dice_coef: 0.9153 - val_loss: -0.8584 - val_dice_coef: 0.8584
Epoch 120/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9188 - dice_coef: 0.9188 - val_loss: -0.8523 - val_dice_coef: 0.8523
Epoch 121/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9254 - dice_coef: 0.9254 - val_loss: -0.8571 - val_dice_coef: 0.8571
Epoch 122/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9291 - dice_coef: 0.9291 - val_loss: -0.8587 - val_dice_coef: 0.8587
Epoch 123/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9249 - dice_coef: 0.9249 - val_loss: -0.8552 - val_dice_coef: 0.8552
Epoch 124/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9251 - dice_coef: 0.9251 - val_loss: -0.8561 - val_dice_coef: 0.8561
Epoch 125/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9298 - dice_coef: 0.9298 - val_loss: -0.8557 - val_dice_coef: 0.8557
Epoch 126/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9325 - dice_coef: 0.9325 - val_loss: -0.8585 - val_dice_coef: 0.8585
Epoch 127/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9319 - dice_coef: 0.9319 - val_loss: -0.8594 - val_dice_coef: 0.8594
Epoch 128/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9329 - dice_coef: 0.9329 - val_loss: -0.8398 - val_dice_coef: 0.8398
Epoch 129/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9339 - dice_coef: 0.9339 - val_loss: -0.8544 - val_dice_coef: 0.8544
Epoch 130/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9347 - dice_coef: 0.9347 - val_loss: -0.8546 - val_dice_coef: 0.8546
Epoch 131/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9316 - dice_coef: 0.9316 - val_loss: -0.8541 - val_dice_coef: 0.8541
Epoch 132/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9362 - dice_coef: 0.9362 - val_loss: -0.8512 - val_dice_coef: 0.8512
Epoch 133/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9357 - dice_coef: 0.9357 - val_loss: -0.8472 - val_dice_coef: 0.8472
Epoch 134/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9338 - dice_coef: 0.9338 - val_loss: -0.8489 - val_dice_coef: 0.8489
Epoch 135/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9364 - dice_coef: 0.9364 - val_loss: -0.8574 - val_dice_coef: 0.8574
Epoch 136/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9373 - dice_coef: 0.9373 - val_loss: -0.8584 - val_dice_coef: 0.8584
Epoch 137/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9319 - dice_coef: 0.9319 - val_loss: -0.8346 - val_dice_coef: 0.8346
Epoch 138/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9361 - dice_coef: 0.9361 - val_loss: -0.8569 - val_dice_coef: 0.8569
Epoch 139/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9408 - dice_coef: 0.9408 - val_loss: -0.8489 - val_dice_coef: 0.8489
Epoch 140/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9408 - dice_coef: 0.9408 - val_loss: -0.8578 - val_dice_coef: 0.8578
Epoch 141/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9413 - dice_coef: 0.9413 - val_loss: -0.8555 - val_dice_coef: 0.8555
Epoch 142/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9433 - dice_coef: 0.9433 - val_loss: -0.8534 - val_dice_coef: 0.8534
Epoch 143/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9407 - dice_coef: 0.9407 - val_loss: -0.8554 - val_dice_coef: 0.8554
Epoch 144/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9402 - dice_coef: 0.9402 - val_loss: -0.8567 - val_dice_coef: 0.8567
Epoch 145/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9424 - dice_coef: 0.9424 - val_loss: -0.8538 - val_dice_coef: 0.8538
Epoch 146/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9441 - dice_coef: 0.9441 - val_loss: -0.8480 - val_dice_coef: 0.8480
Epoch 147/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9331 - dice_coef: 0.9331 - val_loss: -0.8540 - val_dice_coef: 0.8540
Epoch 148/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9310 - dice_coef: 0.9310 - val_loss: -0.8504 - val_dice_coef: 0.8504
Epoch 149/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9400 - dice_coef: 0.9400 - val_loss: -0.8459 - val_dice_coef: 0.8459
Epoch 150/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9459 - dice_coef: 0.9459 - val_loss: -0.8504 - val_dice_coef: 0.8504
Epoch 151/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9468 - dice_coef: 0.9468 - val_loss: -0.8519 - val_dice_coef: 0.8519
Epoch 152/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9492 - dice_coef: 0.9492 - val_loss: -0.8577 - val_dice_coef: 0.8577
Epoch 153/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9469 - dice_coef: 0.9469 - val_loss: -0.8505 - val_dice_coef: 0.8505
Epoch 154/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9456 - dice_coef: 0.9456 - val_loss: -0.8522 - val_dice_coef: 0.8522
Epoch 155/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9340 - dice_coef: 0.9340 - val_loss: -0.8447 - val_dice_coef: 0.8447
Epoch 156/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9455 - dice_coef: 0.9455 - val_loss: -0.8545 - val_dice_coef: 0.8545
Epoch 157/200
1452/1452 [==============================] - 64s 44ms/step - loss: -0.9481 - dice_coef: 0.9481 - val_loss: -0.8523 - val_dice_coef: 0.8523
Epoch 158/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9489 - dice_coef: 0.9489 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 159/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9481 - dice_coef: 0.9481 - val_loss: -0.8483 - val_dice_coef: 0.8483
Epoch 160/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9518 - dice_coef: 0.9518 - val_loss: -0.8559 - val_dice_coef: 0.8559
Epoch 161/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9482 - dice_coef: 0.9482 - val_loss: -0.8550 - val_dice_coef: 0.8550
Epoch 162/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9461 - dice_coef: 0.9461 - val_loss: -0.8558 - val_dice_coef: 0.8558
Epoch 163/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9510 - dice_coef: 0.9510 - val_loss: -0.8475 - val_dice_coef: 0.8475
Epoch 164/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9488 - dice_coef: 0.9488 - val_loss: -0.8452 - val_dice_coef: 0.8452
Epoch 165/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9522 - dice_coef: 0.9522 - val_loss: -0.8509 - val_dice_coef: 0.8509
Epoch 166/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9521 - dice_coef: 0.9521 - val_loss: -0.8562 - val_dice_coef: 0.8562
Epoch 167/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9485 - dice_coef: 0.9485 - val_loss: -0.8567 - val_dice_coef: 0.8567
Epoch 168/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9459 - dice_coef: 0.9459 - val_loss: -0.8432 - val_dice_coef: 0.8432
Epoch 169/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9522 - dice_coef: 0.9522 - val_loss: -0.8534 - val_dice_coef: 0.8534
Epoch 170/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9552 - dice_coef: 0.9552 - val_loss: -0.8502 - val_dice_coef: 0.8502
Epoch 171/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9541 - dice_coef: 0.9541 - val_loss: -0.8489 - val_dice_coef: 0.8489
Epoch 172/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9511 - dice_coef: 0.9511 - val_loss: -0.8518 - val_dice_coef: 0.8518
Epoch 173/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9567 - dice_coef: 0.9567 - val_loss: -0.8502 - val_dice_coef: 0.8502
Epoch 174/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9554 - dice_coef: 0.9554 - val_loss: -0.8549 - val_dice_coef: 0.8549
Epoch 175/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9493 - dice_coef: 0.9493 - val_loss: -0.8537 - val_dice_coef: 0.8537
Epoch 176/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9534 - dice_coef: 0.9534 - val_loss: -0.8488 - val_dice_coef: 0.8488
Epoch 177/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9416 - dice_coef: 0.9416 - val_loss: -0.8398 - val_dice_coef: 0.8398
Epoch 178/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9531 - dice_coef: 0.9531 - val_loss: -0.8553 - val_dice_coef: 0.8553
Epoch 179/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9589 - dice_coef: 0.9589 - val_loss: -0.8548 - val_dice_coef: 0.8548
Epoch 180/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9559 - dice_coef: 0.9559 - val_loss: -0.8537 - val_dice_coef: 0.8537
Epoch 181/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9566 - dice_coef: 0.9566 - val_loss: -0.8508 - val_dice_coef: 0.8508
Epoch 182/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9566 - dice_coef: 0.9566 - val_loss: -0.8397 - val_dice_coef: 0.8397
Epoch 183/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9533 - dice_coef: 0.9533 - val_loss: -0.8466 - val_dice_coef: 0.8466
Epoch 184/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9586 - dice_coef: 0.9586 - val_loss: -0.8553 - val_dice_coef: 0.8553
Epoch 185/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9530 - dice_coef: 0.9530 - val_loss: -0.8516 - val_dice_coef: 0.8516
Epoch 186/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9569 - dice_coef: 0.9569 - val_loss: -0.8572 - val_dice_coef: 0.8572
Epoch 187/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9565 - dice_coef: 0.9565 - val_loss: -0.8431 - val_dice_coef: 0.8431
Epoch 188/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9560 - dice_coef: 0.9560 - val_loss: -0.8462 - val_dice_coef: 0.8462
Epoch 189/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9572 - dice_coef: 0.9572 - val_loss: -0.8389 - val_dice_coef: 0.8389
Epoch 190/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9581 - dice_coef: 0.9581 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 191/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9548 - dice_coef: 0.9548 - val_loss: -0.8541 - val_dice_coef: 0.8541
Epoch 192/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9436 - dice_coef: 0.9436 - val_loss: -0.8358 - val_dice_coef: 0.8358
Epoch 193/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9522 - dice_coef: 0.9522 - val_loss: -0.8530 - val_dice_coef: 0.8530
Epoch 194/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9540 - dice_coef: 0.9540 - val_loss: -0.8461 - val_dice_coef: 0.8461
Epoch 195/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9605 - dice_coef: 0.9605 - val_loss: -0.8526 - val_dice_coef: 0.8526
Epoch 196/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9636 - dice_coef: 0.9636 - val_loss: -0.8545 - val_dice_coef: 0.8545
Epoch 197/200
1452/1452 [==============================] - 63s 43ms/step - loss: -0.9643 - dice_coef: 0.9643 - val_loss: -0.8451 - val_dice_coef: 0.8451
Epoch 198/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9618 - dice_coef: 0.9618 - val_loss: -0.8521 - val_dice_coef: 0.8521
Epoch 199/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9627 - dice_coef: 0.9627 - val_loss: -0.8452 - val_dice_coef: 0.8452
Epoch 200/200
1452/1452 [==============================] - 63s 44ms/step - loss: -0.9570 - dice_coef: 0.9570 - val_loss: -0.8500 - val_dice_coef: 0.8500
In [11]:
    # Save History
    f = open(dname_checkpoints + '/' + fname_history,'wb')
    pickle.dump(history.history,f)
    f.close
Out[11]:
<function BufferedWriter.close>
In [14]:
#
#  TEST MODE
#
mode = 'TEST'
if mode == "TEST":
    # Prediction (test) mode

    # 学習済みの重みをロード
    epoch = 200
    fname_weights = 'model_weights_%02d.h5'%(epoch)
    fpath_weights = os.path.join(dname_checkpoints, fname_weights)
    model_fcn00.load_weights(fpath_weights)
    print('==> done')
==> done
In [15]:
    # Read Test Data
    fnames = load_fnames('data/list_test_01.txt')

    [fpaths_xs_test,fpaths_ys_test] = make_fnames(fnames,'data/img','data/mask','OperatorA_')

    X_test = load_imgs_asarray(fpaths_xs_test, grayscale=False, target_size=target_size,
                                dim_ordering=dim_ordering)
    #Y_test = load_imgs_asarray(fpaths_ys_test, grayscale=True, target_size=target_size,
    #                            dim_ordering=dim_ordering)

    # トレーニング時に計算した平均・標準偏差をロード    
    print('loading mean and standard deviation from ' + fname_stats + '...')
    stats = np.load(dname_checkpoints + '/' + fname_stats)
    mean = stats['mean']
    std = stats['std']
    print('==> mean: ' + str(mean))
    print('==> std : ' + str(std))

    for i in range(3):
        X_test[:, i] = (X_test[:, i] - mean[i]) / std[i]
    print('==> done')
loading mean and standard deviation from stats01.npz...
==> mean: [130.65465  91.2685   76.63643]
==> std : [55.2817   43.990963 43.113483]
==> done
In [16]:
    # テストを開始
    outputs = model_fcn00.predict(X_test)
#    outputs = model_fcn02.predict(X_test)
    
In [17]:
    # 出力を画像として保存
    dname_outputs = './outputs/'
    if not os.path.isdir(dname_outputs):
        print('create directory: %s'%(dname_outputs))
        os.mkdir(dname_outputs)

    print('saving outputs as images...')
    n = 0
    for i, array in enumerate(outputs):
        array = np.where(array > 0.5, 1, 0) # 二値に変換
        array = array.astype(np.float32)
        img_out = array_to_img(array, dim_ordering)
        # fpath_out = os.path.join(dname_outputs, fnames[i])
        fpath_out = os.path.join(dname_outputs, "%05d.png"%(n))
        img_out.save(fpath_out)
        n = n + 1

    print('==> done')
saving outputs as images...
==> done
In [20]:
    from PIL import Image
    import matplotlib.pyplot as plt

    n = 0
    dice_eval = []
    
    for i in range(len(fpaths_xs_test)):
        # テスト画像
        im1 = Image.open(fpaths_xs_test[i])
        im1 = im1.resize((320,240)) 
        # 出力結果
        im2 = Image.open(os.path.join(dname_outputs, "%05d.png"%(n)))
        im2 = im2.resize((320,240))
        # Grond Truth
        im3 = Image.open(fpaths_ys_test[i])
        im3 = im3.resize((320,240))

        im2_d = np.zeros((240,320,3), 'uint8')
        im2_d[:,:,0] = np.array(im2)
        im2_d[:,:,1] = np.array(im3)*255
        im2_d[:,:,2] = 0

        # Compute dice coeff
        im2a = np.array(im2)
        im2a[im2a > 0] = 1
        im3a = np.array(im3)
        im3a[im3a > 0] = 1
        
        overlap_a = np.array(im2a) * np.array(im3a)
        overlap_b = np.array(im2a) + np.array(im3a)
        print('%03d: Dice Coeff = %f'%(i, 2*sum(sum(overlap_a))/sum(sum(overlap_b))))
        print('%f'%img_dice_coeff(im2,im3))
        dice_eval.append(2*sum(sum(overlap_a))/sum(sum(overlap_b)))

        plt.imshow(np.hstack((np.array(im1),np.array(im2_d))))
        plt.show()

        n = n + 1
    
    print('Dice eval av. : %f'%np.mean(np.array(dice_eval)))
000: Dice Coeff = 0.906205
0.906205
001: Dice Coeff = 0.686357
0.686357
002: Dice Coeff = 0.843728
0.843728
003: Dice Coeff = 0.827191
0.827191
004: Dice Coeff = 0.683859
0.683859
005: Dice Coeff = 0.647564
0.647564
006: Dice Coeff = 0.751790
0.751790
007: Dice Coeff = 0.726744
0.726744
008: Dice Coeff = 0.887689
0.887689
009: Dice Coeff = 0.866091
0.866091
010: Dice Coeff = 0.943221
0.943221
011: Dice Coeff = 0.576271
0.576271
012: Dice Coeff = 0.900179
0.900179
013: Dice Coeff = 0.928693
0.928693
014: Dice Coeff = 0.577568
0.577568
015: Dice Coeff = 0.883039
0.883039
016: Dice Coeff = 0.765189
0.765189
017: Dice Coeff = 0.884438
0.884438
018: Dice Coeff = 0.810642
0.810642
019: Dice Coeff = 0.905350
0.905350
020: Dice Coeff = 0.886776
0.886776
021: Dice Coeff = 0.906096
0.906096
022: Dice Coeff = 0.877193
0.877193
023: Dice Coeff = 0.917443
0.917443
024: Dice Coeff = 0.917799
0.917799
025: Dice Coeff = 0.902755
0.902755
026: Dice Coeff = 0.934988
0.934988
027: Dice Coeff = 0.883817
0.883817
028: Dice Coeff = 0.900958
0.900958
029: Dice Coeff = 0.790347
0.790347
030: Dice Coeff = 0.924180
0.924180
031: Dice Coeff = 0.937360
0.937360
032: Dice Coeff = 0.874859
0.874859
033: Dice Coeff = 0.720195
0.720195
034: Dice Coeff = 0.921507
0.921507
035: Dice Coeff = 0.857895
0.857895
036: Dice Coeff = 0.906964
0.906964
037: Dice Coeff = 0.874414
0.874414
038: Dice Coeff = 0.751807
0.751807
039: Dice Coeff = 0.848967
0.848967
040: Dice Coeff = 0.890263
0.890263
041: Dice Coeff = 0.923518
0.923518
042: Dice Coeff = 0.906360
0.906360
043: Dice Coeff = 0.829448
0.829448
044: Dice Coeff = 0.918919
0.918919
045: Dice Coeff = 0.876296
0.876296
046: Dice Coeff = 0.922123
0.922123
047: Dice Coeff = 0.922873
0.922873
048: Dice Coeff = 0.925942
0.925942
049: Dice Coeff = 0.816038
0.816038
050: Dice Coeff = 0.904387
0.904387
051: Dice Coeff = 0.865747
0.865747
052: Dice Coeff = 0.879545
0.879545
053: Dice Coeff = 0.921171
0.921171
054: Dice Coeff = 0.792105
0.792105
055: Dice Coeff = 0.751361
0.751361
056: Dice Coeff = 0.899866
0.899866
057: Dice Coeff = 0.909780
0.909780
058: Dice Coeff = 0.932331
0.932331
059: Dice Coeff = 0.897155
0.897155
060: Dice Coeff = 0.817308
0.817308
061: Dice Coeff = 0.913408
0.913408
062: Dice Coeff = 0.899676
0.899676
063: Dice Coeff = 0.854093
0.854093
064: Dice Coeff = 0.908382
0.908382
065: Dice Coeff = 0.855914
0.855914
066: Dice Coeff = 0.743363
0.743363
067: Dice Coeff = 0.863795
0.863795
068: Dice Coeff = 0.835938
0.835938
069: Dice Coeff = 0.678442
0.678442
070: Dice Coeff = 0.840672
0.840672
071: Dice Coeff = 0.929220
0.929220
072: Dice Coeff = 0.886473
0.886473
073: Dice Coeff = 0.872771
0.872771
074: Dice Coeff = 0.861472
0.861472
075: Dice Coeff = 0.882416
0.882416
076: Dice Coeff = 0.837143
0.837143
077: Dice Coeff = 0.724638
0.724638
078: Dice Coeff = 0.768622
0.768622
079: Dice Coeff = 0.789579
0.789579
080: Dice Coeff = 0.658738
0.658738
081: Dice Coeff = 0.756439
0.756439
082: Dice Coeff = 0.552000
0.552000
083: Dice Coeff = 0.783708
0.783708
084: Dice Coeff = 0.822695
0.822695
085: Dice Coeff = 0.864564
0.864564
086: Dice Coeff = 0.863675
0.863675
087: Dice Coeff = 0.702564
0.702564
088: Dice Coeff = 0.942029
0.942029
089: Dice Coeff = 0.887417
0.887417
090: Dice Coeff = 0.867955
0.867955
091: Dice Coeff = 0.875779
0.875779
092: Dice Coeff = 0.884243
0.884243
093: Dice Coeff = 0.812500
0.812500
094: Dice Coeff = 0.904184
0.904184
095: Dice Coeff = 0.751342
0.751342
096: Dice Coeff = 0.758422
0.758422
097: Dice Coeff = 0.899396
0.899396
098: Dice Coeff = 0.729002
0.729002
099: Dice Coeff = 0.783505
0.783505
100: Dice Coeff = 0.841905
0.841905
101: Dice Coeff = 0.738397
0.738397
102: Dice Coeff = 0.801712
0.801712
103: Dice Coeff = 0.533729
0.533729
104: Dice Coeff = 0.830022
0.830022
105: Dice Coeff = 0.783034
0.783034
106: Dice Coeff = 0.733017
0.733017
107: Dice Coeff = 0.820762
0.820762
108: Dice Coeff = 0.756463
0.756463
109: Dice Coeff = 0.730117
0.730117
110: Dice Coeff = 0.820084
0.820084
111: Dice Coeff = 0.908356
0.908356
112: Dice Coeff = 0.775978
0.775978
113: Dice Coeff = 0.942123
0.942123
114: Dice Coeff = 0.849920
0.849920
115: Dice Coeff = 0.530612
0.530612
116: Dice Coeff = 0.854111
0.854111
117: Dice Coeff = 0.628571
0.628571
118: Dice Coeff = 0.855057
0.855057
119: Dice Coeff = 0.903061
0.903061
120: Dice Coeff = 0.827670
0.827670
121: Dice Coeff = 0.866213
0.866213
122: Dice Coeff = 0.843185
0.843185
123: Dice Coeff = 0.922631
0.922631
124: Dice Coeff = 0.833333
0.833333
125: Dice Coeff = 0.764833
0.764833
126: Dice Coeff = 0.726225
0.726225
127: Dice Coeff = 0.915033
0.915033
128: Dice Coeff = 0.641176
0.641176
129: Dice Coeff = 0.901679
0.901679
130: Dice Coeff = 0.816020
0.816020
131: Dice Coeff = 0.850455
0.850455
132: Dice Coeff = 0.802239
0.802239
133: Dice Coeff = 0.871197
0.871197
134: Dice Coeff = 0.756393
0.756393
135: Dice Coeff = 0.916908
0.916908
136: Dice Coeff = 0.851175
0.851175
137: Dice Coeff = 0.194393
0.194393
138: Dice Coeff = 0.865154
0.865154
139: Dice Coeff = 0.759690
0.759690
140: Dice Coeff = 0.890529
0.890529
141: Dice Coeff = 0.000000
0.000000
142: Dice Coeff = 0.811659
0.811659
143: Dice Coeff = 0.445055
0.445055
144: Dice Coeff = 0.938578
0.938578
145: Dice Coeff = 0.829396
0.829396
146: Dice Coeff = 0.870653
0.870653
147: Dice Coeff = 0.498010
0.498010
148: Dice Coeff = 0.276596
0.276596
149: Dice Coeff = 0.781250
0.781250
150: Dice Coeff = 0.488189
0.488189
151: Dice Coeff = 0.788609
0.788609
152: Dice Coeff = 0.762980
0.762980
153: Dice Coeff = 0.501193
0.501193
154: Dice Coeff = 0.753695
0.753695
155: Dice Coeff = 0.862843
0.862843
156: Dice Coeff = 0.917749
0.917749
157: Dice Coeff = 0.669433
0.669433
158: Dice Coeff = 0.903135
0.903135
159: Dice Coeff = 0.827443
0.827443
160: Dice Coeff = 0.844508
0.844508
161: Dice Coeff = 0.693642
0.693642
162: Dice Coeff = 0.769640
0.769640
163: Dice Coeff = 0.903401
0.903401
164: Dice Coeff = 0.501718
0.501718
165: Dice Coeff = 0.580645
0.580645
166: Dice Coeff = 0.931241
0.931241
167: Dice Coeff = 0.663866
0.663866
168: Dice Coeff = 0.840215
0.840215
169: Dice Coeff = 0.800463
0.800463
170: Dice Coeff = 0.032389
0.032389
171: Dice Coeff = 0.686071
0.686071
172: Dice Coeff = 0.598319
0.598319
173: Dice Coeff = 0.765492
0.765492
174: Dice Coeff = 0.483436
0.483436
175: Dice Coeff = 0.668020
0.668020
176: Dice Coeff = 0.724919
0.724919
177: Dice Coeff = 0.847656
0.847656
178: Dice Coeff = 0.784848
0.784848
179: Dice Coeff = 0.823438
0.823438
180: Dice Coeff = 0.862569
0.862569
181: Dice Coeff = 0.779006
0.779006
182: Dice Coeff = 0.756286
0.756286
183: Dice Coeff = 0.648253
0.648253
184: Dice Coeff = 0.824140
0.824140
185: Dice Coeff = 0.710458
0.710458
186: Dice Coeff = 0.621375
0.621375
187: Dice Coeff = 0.000000
0.000000
188: Dice Coeff = 0.807179
0.807179
189: Dice Coeff = 0.630915
0.630915
190: Dice Coeff = 0.818148
0.818148
191: Dice Coeff = 0.621064
0.621064
192: Dice Coeff = 0.000000
0.000000
193: Dice Coeff = 0.619952
0.619952
194: Dice Coeff = 0.333789
0.333789
195: Dice Coeff = 0.704067
0.704067
196: Dice Coeff = 0.319838
0.319838
197: Dice Coeff = 0.704467
0.704467
198: Dice Coeff = 0.735243
0.735243
199: Dice Coeff = 0.852066
0.852066
200: Dice Coeff = 0.847682
0.847682
201: Dice Coeff = 0.627383
0.627383
202: Dice Coeff = 0.661373
0.661373
203: Dice Coeff = 0.780370
0.780370
204: Dice Coeff = 0.808642
0.808642
205: Dice Coeff = 0.862295
0.862295
206: Dice Coeff = 0.825489
0.825489
207: Dice Coeff = 0.958606
0.958606
208: Dice Coeff = 0.615776
0.615776
209: Dice Coeff = 0.815331
0.815331
210: Dice Coeff = 0.943516
0.943516
211: Dice Coeff = 0.914541
0.914541
212: Dice Coeff = 0.863636
0.863636
213: Dice Coeff = 0.756364
0.756364
214: Dice Coeff = 0.801790
0.801790
215: Dice Coeff = 0.947406
0.947406
216: Dice Coeff = 0.736508
0.736508
217: Dice Coeff = 0.878912
0.878912
218: Dice Coeff = 0.829538
0.829538
219: Dice Coeff = 0.939987
0.939987
220: Dice Coeff = 0.808149
0.808149
221: Dice Coeff = 0.828947
0.828947
222: Dice Coeff = 0.850700
0.850700
223: Dice Coeff = 0.842224
0.842224
224: Dice Coeff = 0.678873
0.678873
225: Dice Coeff = 0.728814
0.728814
226: Dice Coeff = 0.885751
0.885751
227: Dice Coeff = 0.840441
0.840441
228: Dice Coeff = 0.792373
0.792373
229: Dice Coeff = 0.667656
0.667656
230: Dice Coeff = 0.851652
0.851652
231: Dice Coeff = 0.857453
0.857453
232: Dice Coeff = 0.666667
0.666667
233: Dice Coeff = 0.856549
0.856549
234: Dice Coeff = 0.835979
0.835979
235: Dice Coeff = 0.803015
0.803015
236: Dice Coeff = 0.538012
0.538012
237: Dice Coeff = 0.692629
0.692629
238: Dice Coeff = 0.951886
0.951886
239: Dice Coeff = 0.742308
0.742308
240: Dice Coeff = 0.745921
0.745921
241: Dice Coeff = 0.680473
0.680473
242: Dice Coeff = 0.538619
0.538619
243: Dice Coeff = 0.826751
0.826751
244: Dice Coeff = 0.771784
0.771784
245: Dice Coeff = 0.931854
0.931854
246: Dice Coeff = 0.824916
0.824916
247: Dice Coeff = 0.932739
0.932739
248: Dice Coeff = 0.798061
0.798061
249: Dice Coeff = 0.690998
0.690998
250: Dice Coeff = 0.614340
0.614340
251: Dice Coeff = 0.584551
0.584551
252: Dice Coeff = 0.521327
0.521327
253: Dice Coeff = 0.654919
0.654919
254: Dice Coeff = 0.792671
0.792671
255: Dice Coeff = 0.760050
0.760050
256: Dice Coeff = 0.798535
0.798535
257: Dice Coeff = 0.880620
0.880620
258: Dice Coeff = 0.621664
0.621664
259: Dice Coeff = 0.904339
0.904339
260: Dice Coeff = 0.000000
0.000000
261: Dice Coeff = 0.558266
0.558266
262: Dice Coeff = 0.720174
0.720174
263: Dice Coeff = 0.770718
0.770718
264: Dice Coeff = 0.383275
0.383275
265: Dice Coeff = 0.821566
0.821566
266: Dice Coeff = 0.650367
0.650367
267: Dice Coeff = 0.637079
0.637079
268: Dice Coeff = 0.003210
0.003210
269: Dice Coeff = 0.620908
0.620908
Dice eval av. : 0.770130
In [19]:
#
#   Show History
#
mode = "SHOW_HISTORY"
if mode == "SHOW_HISTORY":
    # load pickle
    print(dname_checkpoints + '/' + fname_history)
    history = pickle.load(open(dname_checkpoints + '/' + fname_history, 'rb'))
    
    for k in history.keys():
        plt.plot(history[k])
        plt.title(k)
        plt.show()
checkpoints_fcn00/history.pkl